out:
free_vmcs(vmcs);
if(v->arch.arch_vmx.io_bitmap_a != 0) {
- free_xenheap_pages(v->arch.arch_vmx.io_bitmap_a, get_order(0x1000));
+ free_xenheap_pages(
+ v->arch.arch_vmx.io_bitmap_a, get_order_from_bytes(0x1000));
v->arch.arch_vmx.io_bitmap_a = 0;
}
if(v->arch.arch_vmx.io_bitmap_b != 0) {
- free_xenheap_pages(v->arch.arch_vmx.io_bitmap_b, get_order(0x1000));
+ free_xenheap_pages(
+ v->arch.arch_vmx.io_bitmap_b, get_order_from_bytes(0x1000));
v->arch.arch_vmx.io_bitmap_b = 0;
}
v->arch.arch_vmx.vmcs = 0;
BUG_ON(v->arch.arch_vmx.vmcs == NULL);
free_vmcs(v->arch.arch_vmx.vmcs);
if(v->arch.arch_vmx.io_bitmap_a != 0) {
- free_xenheap_pages(v->arch.arch_vmx.io_bitmap_a, get_order(0x1000));
+ free_xenheap_pages(
+ v->arch.arch_vmx.io_bitmap_a, get_order_from_bytes(0x1000));
v->arch.arch_vmx.io_bitmap_a = 0;
}
if(v->arch.arch_vmx.io_bitmap_b != 0) {
- free_xenheap_pages(v->arch.arch_vmx.io_bitmap_b, get_order(0x1000));
+ free_xenheap_pages(
+ v->arch.arch_vmx.io_bitmap_b, get_order_from_bytes(0x1000));
v->arch.arch_vmx.io_bitmap_b = 0;
}
v->arch.arch_vmx.vmcs = 0;
struct pfn_info *page;
unsigned int order;
/*
- * Allocate up to 2MB at a time:
- * 1. This prevents overflow of get_order() when allocating more than
- * 4GB to domain 0 on a PAE machine.
- * 2. It prevents allocating very large chunks from DMA pools before
- * the >4GB pool is fully depleted.
+ * Allocate up to 2MB at a time: It prevents allocating very large chunks
+ * from DMA pools before the >4GB pool is fully depleted.
*/
if ( max_pages > (2UL << (20 - PAGE_SHIFT)) )
max_pages = 2UL << (20 - PAGE_SHIFT);
- order = get_order(max_pages << PAGE_SHIFT);
+ order = get_order_from_pages(max_pages);
if ( (max_pages & (max_pages-1)) != 0 )
order--;
while ( (page = alloc_domheap_pages(d, order, 0)) == NULL )
#endif
}
- order = get_order(v_end - dsi.v_start);
+ order = get_order_from_bytes(v_end - dsi.v_start);
if ( (1UL << order) > nr_pages )
panic("Domain 0 allocation is too small for kernel image.\n");
rdmsr(MSR_IA32_VMX_BASIC_MSR, vmx_msr_low, vmx_msr_high);
vmcs_size = vmx_msr_high & 0x1fff;
- vmcs = alloc_xenheap_pages(get_order(vmcs_size));
+ vmcs = alloc_xenheap_pages(get_order_from_bytes(vmcs_size));
memset((char *)vmcs, 0, vmcs_size); /* don't remove this */
vmcs->vmcs_revision_id = vmx_msr_low;
{
int order;
- order = get_order(vmcs_size);
+ order = get_order_from_bytes(vmcs_size);
free_xenheap_pages(vmcs, order);
}
error |= __vmwrite(VM_ENTRY_CONTROLS, MONITOR_VM_ENTRY_CONTROLS);
/* need to use 0x1000 instead of PAGE_SIZE */
- io_bitmap_a = (void*) alloc_xenheap_pages(get_order(0x1000));
- io_bitmap_b = (void*) alloc_xenheap_pages(get_order(0x1000));
+ io_bitmap_a = (void*) alloc_xenheap_pages(get_order_from_bytes(0x1000));
+ io_bitmap_b = (void*) alloc_xenheap_pages(get_order_from_bytes(0x1000));
memset(io_bitmap_a, 0xff, 0x1000);
/* don't bother debug port access */
clear_bit(PC_DEBUG_PORT, io_bitmap_a);
}
/* Set up mapping cache for domain pages. */
- mapcache_order = get_order(MAPCACHE_MBYTES << (20 - PAGETABLE_ORDER));
+ mapcache_order = get_order_from_bytes(
+ MAPCACHE_MBYTES << (20 - PAGETABLE_ORDER));
mapcache = alloc_xenheap_pages(mapcache_order);
memset(mapcache, 0, PAGE_SIZE << mapcache_order);
for ( i = 0; i < (MAPCACHE_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ )
reservation.nr_extents -= start_extent;
if ( (reservation.address_bits != 0) &&
- (reservation.address_bits < (get_order(max_page) + PAGE_SHIFT)) )
+ (reservation.address_bits <
+ (get_order_from_pages(max_page) + PAGE_SHIFT)) )
{
if ( reservation.address_bits < 31 )
return -ENOMEM;
}
nr_pages = num_online_cpus() * opt_tbuf_size;
- order = get_order(nr_pages * PAGE_SIZE);
+ order = get_order_from_pages(nr_pages);
if ( (rawbuf = alloc_xenheap_pages(order)) == NULL )
{
static void *xmalloc_whole_pages(size_t size)
{
struct xmalloc_hdr *hdr;
- unsigned int pageorder = get_order(size);
+ unsigned int pageorder = get_order_from_bytes(size);
hdr = alloc_xenheap_pages(pageorder);
if ( hdr == NULL )
/* Big allocs free directly. */
if ( hdr->size >= PAGE_SIZE )
{
- free_xenheap_pages(hdr, get_order(hdr->size));
+ free_xenheap_pages(hdr, get_order_from_bytes(hdr->size));
return;
}
if ( bytes == 0 )
return 0;
- order = get_order(bytes);
+ order = get_order_from_bytes(bytes);
debugtrace_buf = alloc_xenheap_pages(order);
ASSERT(debugtrace_buf != NULL);
void serial_async_transmit(struct serial_port *port)
{
BUG_ON(!port->driver->tx_empty);
- if ( !port->txbuf )
- port->txbuf = alloc_xenheap_pages(get_order(SERIAL_TXBUFSZ));
+ if ( port->txbuf == NULL )
+ port->txbuf = alloc_xenheap_pages(
+ get_order_from_bytes(SERIAL_TXBUFSZ));
}
/*
#ifndef __ASSEMBLY__
-static __inline__ int get_order(unsigned long size)
+static inline int get_order_from_bytes(physaddr_t size)
{
int order;
size = (size-1) >> PAGE_SHIFT;
return order;
}
+static inline int get_order_from_pages(unsigned long nr_pages)
+{
+ int order;
+ nr_pages--;
+ for ( order = 0; nr_pages; order++ )
+ nr_pages >>= 1;
+ return order;
+}
+
/* Allocator functions for Xen pagetables. */
struct pfn_info *alloc_xen_pagetable(void);
void free_xen_pagetable(struct pfn_info *pg);